for ( i = 0; i < 1<<order; i++ )
{
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+ struct vcpu *v;
+ for_each_vcpu(d, v)
+ {
+ /* No longer safe to look for a writeable mapping in this shadow */
+ if ( v->arch.shadow.last_writeable_pte_smfn == mfn_x(smfn) + i )
+ v->arch.shadow.last_writeable_pte_smfn = 0;
+ }
+#endif
/* Strip out the type: this is now a free shadow page */
pg[i].count_info = 0;
/* Remember the TLB timestamp so we will know whether to flush
unsigned long gfn;
/* Heuristic: there is likely to be only one writeable mapping,
* and that mapping is likely to be in the current pagetable,
- * either in the guest's linear map (linux, windows) or in a
- * magic slot used to map high memory regions (linux HIGHTPTE) */
+ * in the guest's linear map (on non-HIGHPTE linux and windows)*/
#define GUESS(_a, _h) do { \
- if ( v->arch.shadow.mode->guess_wrmap(v, (_a), gmfn) ) \
- perfc_incrc(shadow_writeable_h_ ## _h); \
+ if ( v->arch.shadow.mode->guess_wrmap(v, (_a), gmfn) ) \
+ perfc_incrc(shadow_writeable_h_ ## _h); \
if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \
return 1; \
} while (0)
#endif /* CONFIG_PAGING_LEVELS >= 3 */
#undef GUESS
+ }
+
+ if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )
+ return 1;
+
+ /* Second heuristic: on HIGHPTE linux, there are two particular PTEs
+ * (entries in the fixmap) where linux maps its pagetables. Since
+ * we expect to hit them most of the time, we start the search for
+ * the writeable mapping by looking at the same MFN where the last
+ * brute-force search succeeded. */
+ if ( v->arch.shadow.last_writeable_pte_smfn != 0 )
+ {
+ unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
+ mfn_t last_smfn = _mfn(v->arch.shadow.last_writeable_pte_smfn);
+ int shtype = (mfn_to_page(last_smfn)->count_info & PGC_SH_type_mask)
+ >> PGC_SH_type_shift;
+
+ if ( callbacks[shtype] )
+ callbacks[shtype](v, last_smfn, gmfn);
+
+ if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
+ perfc_incrc(shadow_writeable_h_5);
}
-#endif
+
+ if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )
+ return 1;
+
+#endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */
/* Brute-force search of all the shadows, by walking the hash */
perfc_incrc(shadow_writeable_bf);
{
SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
gfn_x(gfn), PGC_SH_fl1_shadow, mfn_x(smfn));
-
shadow_hash_delete(v, gfn_x(gfn),
PGC_SH_fl1_shadow >> PGC_SH_type_shift, smfn);
}
shadow_l1e_t *sl1e;
int done = 0;
int flags;
+ mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */
SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
{
&& (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) )
{
shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+ /* Remember the last shadow that we shot a writeable mapping in */
+ v->arch.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn);
+#endif
if ( (mfn_to_page(readonly_mfn)->u.inuse.type_info
& PGT_count_mask) == 0 )
/* This breaks us cleanly out of the FOREACH macro */
PERFCOUNTER_CPU(shadow_writeable_h_2, "shadow writeable: 32pae w2k3")
PERFCOUNTER_CPU(shadow_writeable_h_3, "shadow writeable: 64b w2k3")
PERFCOUNTER_CPU(shadow_writeable_h_4, "shadow writeable: 32b linux low")
+PERFCOUNTER_CPU(shadow_writeable_h_5, "shadow writeable: 32b linux high")
PERFCOUNTER_CPU(shadow_writeable_bf, "shadow writeable brute-force")
PERFCOUNTER_CPU(shadow_mappings, "shadow removes all mappings")
PERFCOUNTER_CPU(shadow_mappings_bf, "shadow rm-mappings brute-force")